In [2]:
# http://www.glozman.com/textpages.html
# Harry Potter 1 - Sorcerer's Stone.txt
# Harry Potter 2 - Chamber of Secrets.txt
# Harry Potter 3 - The Prisoner Of Azkaban.txt
# Harry Potter 4 - The Goblet Of Fire.txt
# Harry Potter 5 - Order of the Phoenix.txt
# Harry Potter 6 - The Half Blood Prince.txt
# Harry Potter 7 - Deathly Hollows.txt
In [3]:
with open("texts/HarryPotter1-SorcerersStone.txt", "r") as f:
text = f.read().lower()
In [4]:
chars = sorted(list(set(text)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
'corpus length: {} total chars: {}'.format(len(text), len(chars))
Out[4]:
In [5]:
print(text[:100])
Build a training and test dataset. Take 40 characters and then save the 41st character. We will teach the model that a certain 40 char sequence should generate the 41st char. Use a step size of 3 so there is overlap in the training set and we get a lot more 40/41 samples.
In [6]:
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i+maxlen])
next_chars.append(text[i + maxlen])
print("sequences: ", len(sentences))
In [7]:
print(sentences[0])
print(sentences[1])
In [8]:
print(next_chars[0])
One-hot encode
In [9]:
import numpy as np
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
In [10]:
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.optimizers import RMSprop
model = Sequential()
model.add(LSTM(256, recurrent_dropout=0.0, input_shape=(maxlen, len(chars)), return_sequences=True))
model.add(LSTM(256, recurrent_dropout=0.0, input_shape=(maxlen, len(chars)), return_sequences=True))
model.add(LSTM(256, recurrent_dropout=0.0, input_shape=(maxlen, len(chars))))
model.add(Dense(2*len(chars)))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
model.summary()
In [54]:
epochs = 100
batch_size = 512
model.fit(X, y, batch_size=batch_size, epochs=epochs)
Out[54]:
In [55]:
# model.save_weights("potter_lstm_weights_0568.h5")
In [11]:
model.load_weights("potter_lstm_weights_0568.h5")
In [12]:
import random
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
In [13]:
import sys
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
In [ ]: